# If nomigrate is set, disable migration
nomigrate = self.info["platform"].get("nomigrate")
- if arch.type == "x86" and nomigrate is not None and long(nomigrate) != 0:
+ if nomigrate is not None and long(nomigrate) != 0:
xc.domain_disable_migrate(self.domid)
# Optionally enable virtual HPET
spin_lock_init(&d->arch.vtsc_lock);
- if ( d->domain_id == 0 )
- d->disable_migrate = 1;
-
return 0;
fail:
}
break;
- case XEN_DOMCTL_disable_migrate:
- {
- struct domain *d;
-
- ret = -ESRCH;
- d = rcu_lock_domain_by_id(domctl->domain);
- if ( d == NULL )
- break;
-
- domain_pause(d);
- d->arch.disable_migrate = domctl->u.disable_migrate.disable;
- domain_unpause(d);
-
- rcu_unlock_domain(d);
- ret = 0;
- }
- break;
-
case XEN_DOMCTL_suppress_spurious_page_faults:
{
struct domain *d;
if ( domcr_flags & DOMCRF_hvm )
d->is_hvm = 1;
- if ( (domid == 0) && opt_dom0_vcpus_pin )
- d->is_pinned = 1;
+ if ( domid == 0 )
+ {
+ d->is_pinned = opt_dom0_vcpus_pin;
+ d->disable_migrate = 1;
+ }
if ( domcr_flags & DOMCRF_dummy )
return d;
}
break;
+ case XEN_DOMCTL_disable_migrate:
+ {
+ struct domain *d;
+ ret = -ESRCH;
+ if ( (d = rcu_lock_domain_by_id(op->domain)) != NULL )
+ {
+ d->disable_migrate = op->u.disable_migrate.disable;
+ rcu_unlock_domain(d);
+ ret = 0;
+ }
+ }
+ break;
+
default:
ret = arch_do_domctl(op, u_domctl);
break;
spinlock_t vtsc_lock;
uint64_t vtsc_kerncount; /* for hvm, counts all vtsc */
uint64_t vtsc_usercount; /* not used for hvm */
-
- /* mark domain as non-migratable and non-restoreable */
- bool_t disable_migrate;
} __cacheline_aligned;
#define has_arch_pdevs(d) (!list_empty(&(d)->arch.pdev_list))
void *tmem;
struct lock_profile_qhead profile_head;
+
+ /* Non-migratable and non-restoreable? */
+ bool_t disable_migrate;
};
struct domain_setup_info